Data augmentation with numpy¶

Data Augmentation are “Techniques used to increase the amount of data by adding slightly modified copies of already existing data or newly created synthetic data from existing data.”

-> Data Augmentation is about making minimal changes to existing data to create new representative data.

-> Why? : improve performance and outcomes of machine learning models by forming new and different examples to train datasets. If dataset in a machine learning model is rich and sufficient, the model performs better and more accurate.

Importing the necessary Libraries for the first part

In [13]:
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import glob

The first part :¶

Functions :¶

10 Transformation Techniques :

Random Rotating, Vertical Flipping, Horizontal Flipping, Translation along X, Translation along Y, Cropping, Zooming, Color Modification, Adding Light Gaussian Noise, Adding Illumination.

In [2]:
def random_rotating(src_img):
    rot_img = np.rot90(src_img,np.random.randint(1,4))
    return rot_img
In [3]:
# Vertical Flipping
def vertical_flipping(src_img):
    im_V_flipp = np.array(list(reversed(src_img))) #reverse matrix
    return im_V_flipp
In [4]:
# Horizontal Flipping
def horizontal_flipping(src_img):
    im_H_flipp = np.array([list(reversed(row)) for row in src_img])
    return im_H_flipp
In [5]:
# Translation Along X
def translation_along_x(src_img,shift_dist,img_shape):
    height, width = src_img.shape[:2]
    x_dist=shift_dist[0]
    y_dist=shift_dist[1]
    img_final=np.zeros(src_img.shape, dtype="uint8")
    matrix_trans=np.array([[1,0,x_dist],[0,1,y_dist]])
    for i in range(height):
        for j in range(width):
            x_old=j
            y_old=i
            xy_old=np.array([x_old,y_old,1])
            xy=np.dot(matrix_trans,xy_old)
            x=xy[0]
            y=xy[1]
            
            if (0<x<width) and (0<y<height):
                img_final[y,x]=src_img[i,j]
    return img_final
In [6]:
# Translation Along Y
def translation_along_y(src_img,shift_dist,img_shape):
    height, width = src_img.shape[:2]
    x_dist=shift_dist[0]
    y_dist=shift_dist[1]
    img_final=np.zeros(src_img.shape, dtype="uint8")
    matrix_trans=np.array([[1,0,x_dist],[0,1,y_dist]])
    for i in range(height):
        for j in range(width):
            x_old=j
            y_old=i
            xy_old=np.array([x_old,y_old,1])
            xy=np.dot(matrix_trans,xy_old)
            x=xy[0]
            y=xy[1]
            
            if (0<x<width) and (0<y<height):
                img_final[y,x]=src_img[i,j]
    return img_final
In [36]:
#Zooming
def zoom(src_img, w_start,w_end, h_start,h_end):
    zoom_image = np.zeros(src_img.shape,src_img.dtype)
    zoom_image = src_img[w_start:w_end, h_start:h_end]
    return zoom_image
In [22]:
#Cropping
def crop(src_img, x, y, width, height):
    new_image = np.zeros(src_img.shape,src_img.dtype)
    new_image = src_img[y:y + height, x:x+width]
    return new_image
In [9]:
# Color Modification
def ColorModification(src_img):
    fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(20,8))
 
    for c, ax in zip(range(3), axs):
        rgb_img = np.zeros(src_img.shape, dtype="uint8")
        rgb_img[:,:,c] = src_img[:,:,c]
        ax.imshow(rgb_img)
        ax.set_axis_off()
In [10]:
# Adding Light Gaussian Noise
def lightGaussianNoise(src_img):
    f = src_img/255
    x,y,z= f.shape
    mean = 0
    var = 0.3
    sigma = np.sqrt(var)
    noise = np.random.normal(loc=mean, scale=sigma,size=(x,y,z))
    im_gauss_noise = f + noise
    return im_gauss_noise
In [17]:
# Adding Illumination
def addIllumination(src_img, contrast, bright):
    new_image = np.zeros(src_img.shape, src_img.dtype)
    for i in range(src_img.shape[0]):
          for j in range(src_img.shape[1]):
                for k in range(src_img.shape[2]):
                      new_image[i,j,k] = np.clip(contrast*src_img[i,j,k] + bright, 0, 255)  
    return new_image

Dogs¶

Acces to the Dog folder and iterate over with original images and apply the transformations on evey image and save 70% of the augmented images as Training Data and 30% as Testing Data.

In [14]:
Dog_path = r'C:/Users/MSI/M1_BDIA_Python pour Data Science/Data Augmentation Project/Dog/*.jpg'
Dog_images = [plt.imread(image) for image in glob.glob(Dog_path)]
In [224]:
directory_save="C:/Users/MSI/M1_BDIA_Python pour Data Science/Data Augmentation Project/Train_Images/Dog/"
directory_save_test="C:/Users/MSI/M1_BDIA_Python pour Data Science/Data Augmentation Project/Test_Images/Dog/"
for i in range(len(Dog_images)):
        Image.fromarray(random_rotating(Dog_images[i]).astype(np.uint8)).save(directory_save+"Rotating_Image"+str(i)+".jpg",'JPEG')
        Image.fromarray(crop(Dog_images[i], 100, 300, 150, 400)).save(directory_save+"Image_crop"+str(i)+".jpg",'JPEG')
        Image.fromarray(translation_along_x(Dog_images[i],(50,0),Dog_images[i].shape)).save(directory_save+"Image_translation_along_x"+str(i)+".jpg",'JPEG')
        Image.fromarray(translation_along_y(Dog_images[i],(50,0),Dog_images[i].shape)).save(directory_save+"Image_translation_along_y"+str(i)+".jpg",'JPEG') 
        Image.fromarray(lightGaussianNoise(Dog_images[i]).astype(np.uint8)).save(directory_save+"Noisy_Image"+str(i)+".jpg",'JPEG') 
        Image.fromarray(zoom(Dog_images[i],100,300,150,400)).save(directory_save+"Image_zoom"+str(i)+".jpg",'JPEG') 
        #Image.fromarray(ColorModification(Dog_images[i])).save(directory_save+"Image_colorModif"+str(i)+".jpg",'JPEG') 
        Image.fromarray(horizontal_flipping(Dog_images[i])).save(directory_save_test+"H_Flipping"+str(i)+".jpg",'JPEG')
        Image.fromarray(vertical_flipping(Dog_images[i])).save(directory_save_test+"H_Flip_Flipping"+str(i)+".jpg",'JPEG')
        Image.fromarray(addIllumination(Dog_images[i],1,90)).save(directory_save_test+"Illuminated_Image"+str(i)+".jpg",'JPEG')

Tree¶

Acces to the Tree folder and iterate over with original images and apply the transformations on evey image and save 70% of the augmented images as Training Data and 30% as Testing Data.

In [15]:
Tree_path = r'C:/Users/MSI/M1_BDIA_Python pour Data Science/Data Augmentation Project/Tree/*.jpg'
Tree_images = [plt.imread(image) for image in glob.glob(Tree_path)]
In [223]:
directory_save="C:/Users/MSI/M1_BDIA_Python pour Data Science/Data Augmentation Project/Train_Images/Tree/"
directory_save_test="C:/Users/MSI/M1_BDIA_Python pour Data Science/Data Augmentation Project/Test_Images/Tree/"
for i in range(len(Tree_images)):
        Image.fromarray(random_rotating(Tree_images[i]).astype(np.uint8)).save(directory_save+"Rotating_Image"+str(i)+".jpg",'JPEG')
        Image.fromarray(crop(Tree_images[i], 100, 300, 150, 400)).save(directory_save+"Image_crop"+str(i)+".jpg",'JPEG')
        Image.fromarray(translation_along_x(Tree_images[i],(50,0),Tree_images[i].shape)).save(directory_save+"Image_translation_along_x"+str(i)+".jpg",'JPEG')
        Image.fromarray(translation_along_y(Tree_images[i],(50,0),Tree_images[i].shape)).save(directory_save+"Image_translation_along_y"+str(i)+".jpg",'JPEG') 
        Image.fromarray(lightGaussianNoise(Tree_images[i]).astype(np.uint8)).save(directory_save+"Noisy_Image"+str(i)+".jpg",'JPEG') 
        Image.fromarray(zoom(Tree_images[i],100,300,150,400)).save(directory_save+"Image_zoom"+str(i)+".jpg",'JPEG') 
        #Image.fromarray(ColorModification(Tree_images[i])).save(directory_save+"Image_colorModif"+str(i)+".jpg",'JPEG') 
        Image.fromarray(horizontal_flipping(Tree_images[i])).save(directory_save_test+"H_Flipping"+str(i)+".jpg",'JPEG')
        Image.fromarray(vertical_flipping(Tree_images[i])).save(directory_save_test+"H_Flip_Flipping"+str(i)+".jpg",'JPEG')
        Image.fromarray(addIllumination(Tree_images[i],1,90)).save(directory_save_test+"Illuminated_Image"+str(i)+".jpg",'JPEG')

Data Visualization¶

Visualize some images to comparing with the original image.

In [24]:
# Image 0 : Dog
ColorModification(Dog_images[0])
rotating_image = random_rotating(Dog_images[0])
Illuminated_dog0 = addIllumination(Dog_images[0],1,90)
gussian_dog0 = lightGaussianNoise(Dog_images[0])
V_Flip_dog0 = vertical_flipping(Dog_images[0])
H_Flip_dog0 = horizontal_flipping(Dog_images[0])
translation_X_dog0 = translation_along_x(Dog_images[0],(50,0),Dog_images[0].shape)
translation_Y_dog0 = translation_along_y(Dog_images[0],(0,50),Dog_images[0].shape)
zoom_image_dog0 = zoom(Dog_images[0],233,500,150,400)
cropped_image_dog0 = crop(Dog_images[0], 250, 200, 450, 250) 
In [25]:
#Visalization Image 0 : Dog
plt.imshow(Dog_images[0])
plt.title("Original Image")
plt.show()

plt.imshow(Illuminated_dog0)
plt.title("Illuminated Image")
plt.show()


plt.imshow(gussian_dog0)
plt.title("Noisy Image")
plt.show()

plt.imshow(V_Flip_dog0)
plt.title("Vertical Flipped Image")
plt.show()


plt.imshow(H_Flip_dog0)
plt.title("Horizontal Flipped Image")
plt.show()


plt.imshow(translation_X_dog0)
plt.title("Translating Image along X")
plt.show()


plt.imshow(translation_Y_dog0)
plt.title("Translating Image along Y")
plt.show()


plt.imshow(zoom_image_dog0)
plt.title("Zoomed Image")
plt.show()


plt.imshow(cropped_image_dog0)
plt.title("Cropped Image")
plt.show()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
In [26]:
# Image 1 : Dog
ColorModification(Dog_images[1])
Illuminated_dog1 = addIllumination(Dog_images[1],1,90)
gussian_dog1 = lightGaussianNoise(Dog_images[1])
V_Flip_dog1 = vertical_flipping(Dog_images[1])
H_Flip_dog1 = horizontal_flipping(Dog_images[1])
translation_X_dog1 = translation_along_x(Dog_images[1],(50,0),Dog_images[1].shape)
translation_Y_dog1 = translation_along_y(Dog_images[1],(0,50),Dog_images[1].shape)
zoom_image_dog1 = zoom(Dog_images[1],233,500,150,400)
cropped_image_dog1 = crop(Dog_images[1], 250, 200, 450, 250) 
In [27]:
#Visalization Image 1 : Dog
plt.imshow(Dog_images[1])
plt.title("Original Image")
plt.show()

plt.imshow(Illuminated_dog1)
plt.title("Illuminated Image")
plt.show()


plt.imshow(gussian_dog1)
plt.title("Noisy Image")
plt.show()

plt.imshow(V_Flip_dog1)
plt.title("Vertical Flipped Image")
plt.show()


plt.imshow(H_Flip_dog1)
plt.title("Horizontal Flipped Image")
plt.show()


plt.imshow(translation_X_dog1)
plt.title("Translating Image along X")
plt.show()


plt.imshow(translation_Y_dog1)
plt.title("Translating Image along Y")
plt.show()


plt.imshow(zoom_image_dog1)
plt.title("Zoomed Image")
plt.show()


plt.imshow(cropped_image_dog1)
plt.title("Cropped Image")
plt.show()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
In [28]:
# Image 2 : Dog
ColorModification(Dog_images[2])
Illuminated_dog2 = addIllumination(Dog_images[2],1,90)
gussian_dog2 = lightGaussianNoise(Dog_images[2])
V_Flip_dog2 = vertical_flipping(Dog_images[2])
H_Flip_dog2 = horizontal_flipping(Dog_images[2])
translation_X_dog2 = translation_along_x(Dog_images[2],(50,0),Dog_images[2].shape)
translation_Y_dog2 = translation_along_y(Dog_images[2],(0,50),Dog_images[2].shape)
zoom_image_dog2 = zoom(Dog_images[2],233,500,150,400)
cropped_image_dog2 = crop(Dog_images[2], 250, 200, 450, 250) 
In [131]:
#Visalization Image 2 : Dog
plt.imshow(Dog_images[2])
plt.title("Original Image")
plt.show()

plt.imshow(Illuminated_dog2)
plt.title("Illuminated Image")
plt.show()


plt.imshow(gussian_dog2)
plt.title("Noisy Image")
plt.show()

plt.imshow(V_Flip_dog2)
plt.title("Vertical Flipped Image")
plt.show()


plt.imshow(H_Flip_dog2)
plt.title("Horizontal Flipped Image")
plt.show()


plt.imshow(translation_X_dog2)
plt.title("Translating Image along X")
plt.show()


plt.imshow(translation_Y_dog2)
plt.title("Translating Image along Y")
plt.show()


plt.imshow(zoom_image_dog2)
plt.title("Zoomed Image")
plt.show()


plt.imshow(cropped_image_dog2)
plt.title("Cropped Image")
plt.show()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
In [30]:
# Image 0 : Tree
ColorModification(Tree_images[0])
Illuminated_Tree0 = addIllumination(Tree_images[0],1,90)
gussian_Tree0 = lightGaussianNoise(Tree_images[0])
V_Flip_Tree0 = vertical_flipping(Tree_images[0])
H_Flip_Tree0 = horizontal_flipping(Tree_images[0])
translation_X_Tree0 = translation_along_x(Tree_images[0],(50,0),Tree_images[0].shape)
translation_Y_Tree0 = translation_along_y(Tree_images[0],(0,50),Tree_images[0].shape)
zoom_image_Tree0 = zoom(Tree_images[0],100,300,150,400)
cropped_image_Tree0 = crop(Tree_images[0], 100, 300, 150, 400) 
In [34]:
#Visalization Image 0 : Tree
plt.imshow(Tree_images[0])
plt.title("Original Image")
plt.show()

plt.imshow(Illuminated_Tree0)
plt.title("Illuminated Image")
plt.show()


plt.imshow(gussian_Tree0)
plt.title("Noisy Image")
plt.show()

plt.imshow(V_Flip_Tree0)
plt.title("Vertical Flipped Image")
plt.show()


plt.imshow(H_Flip_Tree0)
plt.title("Horizontal Flipped Image")
plt.show()


plt.imshow(translation_X_Tree0)
plt.title("Translating Image along X")
plt.show()


plt.imshow(translation_Y_Tree0)
plt.title("Translating Image along Y")
plt.show()


plt.imshow(zoom_image_Tree0)
plt.title("Zoomed Image")
plt.show()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
In [33]:
# Image 1 : Tree
ColorModification(Tree_images[1])
Illuminated_Tree1 = addIllumination(Tree_images[1],1,90)
gussian_Tree1 = lightGaussianNoise(Tree_images[1])
V_Flip_Tree1 = vertical_flipping(Tree_images[1])
H_Flip_Tree1 = horizontal_flipping(Tree_images[1])
translation_X_Tree1 = translation_along_x(Tree_images[1],(50,0),Tree_images[1].shape)
translation_Y_Tree1 = translation_along_y(Tree_images[1],(0,50),Tree_images[1].shape)
zoom_image_Tree1 = zoom(Tree_images[1],100,300,150,400)
cropped_image_Tree1 = crop(Tree_images[1], 100, 300, 150, 400) 
In [35]:
#Visalization Image 1 : Tree
plt.imshow(Tree_images[1])
plt.title("Original Image")
plt.show()

plt.imshow(Illuminated_Tree1)
plt.title("Illuminated Image")
plt.show()


plt.imshow(gussian_Tree1)
plt.title("Noisy Image")
plt.show()

plt.imshow(V_Flip_Tree1)
plt.title("Vertical Flipped Image")
plt.show()


plt.imshow(H_Flip_Tree1)
plt.title("Horizontal Flipped Image")
plt.show()


plt.imshow(translation_X_Tree1)
plt.title("Translating Image along X")
plt.show()


plt.imshow(translation_Y_Tree1)
plt.title("Translating Image along Y")
plt.show()


plt.imshow(zoom_image_Tree1)
plt.title("Zoomed Image")
plt.show()


plt.imshow(cropped_image_Tree1)
plt.title("Cropped Image")
plt.show()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).

Histograms¶

In [247]:
# One of tree images
plt.imshow(Tree_images[1])
plt.title("Original Image")
plt.show()

# Histogram for pixel intensity (Original Image)
plt.subplot(3,1,1)
img_flat = Tree_images[1].flatten()
plt.hist(img_flat, bins=200, range=[0, 256])
plt.title("Number of pixels in each intensity value")
plt.xlabel("Intensity value")
plt.ylabel("Number of pixels")
plt.show()

plt.subplot(3,1,3)
# Histogram for pixel intensity (Illuminated Image)
Illuminated_Tree1 =Illuminated_Tree1.flatten()
plt.hist(img_ill, bins=200, range=[0, 256])
plt.title("Number of pixels in each intensity value")
plt.xlabel("Intensity value")
plt.ylabel("Number of pixels")
plt.show()
In [253]:
# One of Dog images
plt.imshow(Dog_images[2])
plt.title("Original Image")
plt.show()

# Histogram for pixel intensity (Original Image)
plt.subplot(3,1,1)
img_flat = Dog_images[1].flatten()
plt.hist(img_flat, bins=200, range=[0, 256])
plt.title("Number of pixels in each intensity value")
plt.xlabel("Intensity value")
plt.ylabel("Number of pixels")
plt.show()


plt.subplot(3,1,3)
# Histogram for pixel intensity (Gaussian Noise Image)
gussian_dog1 = gussian_dog1.flatten()
plt.hist(img_ill, bins=200, range=[0, 256])
plt.title("Number of pixels in each intensity value")
plt.xlabel("Intensity value")
plt.ylabel("Number of pixels")
plt.show()
In [77]:
# Image Colors Histograms(RGB)
red_color = image[:,:,0]
green_color = image[:,:,1]
blue_color = image[:,:,2]

plt.subplot(4, 1, 1)
plt.title("histogram of Original image")
plt.hist(image.ravel(), color="yellow")

# Separate Histograms for each color
plt.subplot(4, 1, 2)
plt.title("histogram of Blue")
plt.hist(blue_color.ravel(), color="blue")

plt.subplot(4, 1, 3)
plt.title("histogram of Green")
plt.hist(green_color.ravel(), color="green")
 
plt.subplot(4, 1, 4)
plt.title("histogram of Red")
plt.hist(red_color.ravel(), color="red")
 
# for clear view
plt.tight_layout()
plt.show()
 
# combined histogram
plt.title("Histogram of all RGB Colors")
plt.hist(blue_color.ravel(), color="b")
plt.hist(green_color.ravel(), color="g")
plt.hist(red_color.ravel(), color="r")
plt.show()
In [254]:
def Visualize(im,im1):
    fig, ax = plt.subplots(2, 2)
    ax[0][0].imshow(im)
    ax[0][1].imshow(im1)
    ax[1][0].hist(im.ravel(),256,[0,256])
    ax[1][1].hist(im1.ravel(),256,[0,256])
    ax[0][0].set_ylabel('Original Image')
    ax[0][1].set_ylabel('Augmented Image')
    colors = ("red", "green", "blue")
    plt.figure()
    plt.xlim([0, 256])
    for channel_id, color in enumerate(colors):
        histogram, bin_edges = np.histogram(
            im1[:, :, channel_id], bins=256, range=(0, 256)
        )
        plt.plot(bin_edges[0:-1], histogram, color=color)
    plt.title("Color Histogram of the augmented image")
    plt.xlabel("Color value")
    plt.ylabel("Pixel count")
In [257]:
Visualize(Dog_images[1],Illuminated_dog1)

Binary Classification¶

To categorize the photographs, we'll build a deep learning model using the Tensorflow library.

In [98]:
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow
from tensorflow.keras.layers import *
import os
from keras.models import Sequential
import numpy as np
import keras
from tensorflow.keras.preprocessing import image
from keras.utils.vis_utils import plot_model
from tensorflow.keras.optimizers import RMSprop
from sklearn.metrics import classification_report
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train=ImageDataGenerator(rescale=1/255)
validation=ImageDataGenerator(rescale=1/255)
#import the train and the test data
train_dataset=train.flow_from_directory('C:/Users/MSI/M1_BDIA_Python pour Data Science/Data Augmentation Project/Train_Images',target_size=(224,224),batch_size=3,class_mode='binary')
Test_dataset=train.flow_from_directory('C:/Users/MSI/M1_BDIA_Python pour Data Science/Data Augmentation Project/Test_Images',target_size=(224,224),batch_size=3,class_mode='binary')
Found 80 images belonging to 2 classes.
Found 30 images belonging to 2 classes.
In [99]:
Test_dataset.classes
Out[99]:
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1])
In [100]:
model = Sequential()
model.add(Conv2D(16,(3,3),activation='relu',input_shape=(224,224,3))) #we have 16 filter 
model.add(MaxPool2D(2,2))

model.add(Conv2D(32,(3,3),activation='relu'))#we then learn 32 filters
model.add(MaxPool2D(2,2))

model.add(Conv2D(64,(3,3),activation='relu')) #The final Conv2D layer learns 64 filters
model.add(MaxPool2D(2,2))

model.add(Flatten())
model.add(Dense(512,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.summary()
Model: "sequential_6"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_18 (Conv2D)          (None, 222, 222, 16)      448       
                                                                 
 max_pooling2d_18 (MaxPoolin  (None, 111, 111, 16)     0         
 g2D)                                                            
                                                                 
 conv2d_19 (Conv2D)          (None, 109, 109, 32)      4640      
                                                                 
 max_pooling2d_19 (MaxPoolin  (None, 54, 54, 32)       0         
 g2D)                                                            
                                                                 
 conv2d_20 (Conv2D)          (None, 52, 52, 64)        18496     
                                                                 
 max_pooling2d_20 (MaxPoolin  (None, 26, 26, 64)       0         
 g2D)                                                            
                                                                 
 flatten_6 (Flatten)         (None, 43264)             0         
                                                                 
 dense_12 (Dense)            (None, 512)               22151680  
                                                                 
 dense_13 (Dense)            (None, 1)                 513       
                                                                 
=================================================================
Total params: 22,175,777
Trainable params: 22,175,777
Non-trainable params: 0
_________________________________________________________________
In [101]:
opt = keras.optimizers.RMSprop(learning_rate=0.001)
model.compile(loss='binary_crossentropy',optimizer=opt ,metrics=['accuracy'])
# Train the model 
model_fit=model.fit(train_dataset,epochs=10,validation_data=Test_dataset)
Epoch 1/10
27/27 [==============================] - 7s 249ms/step - loss: 4.4162 - accuracy: 0.6375 - val_loss: 0.3612 - val_accuracy: 0.8000
Epoch 2/10
27/27 [==============================] - 6s 233ms/step - loss: 0.7199 - accuracy: 0.7375 - val_loss: 0.2246 - val_accuracy: 0.9333
Epoch 3/10
27/27 [==============================] - 6s 237ms/step - loss: 0.5454 - accuracy: 0.8000 - val_loss: 0.1838 - val_accuracy: 0.9333
Epoch 4/10
27/27 [==============================] - 6s 240ms/step - loss: 0.3603 - accuracy: 0.8625 - val_loss: 0.1472 - val_accuracy: 1.0000
Epoch 5/10
27/27 [==============================] - 6s 239ms/step - loss: 0.4584 - accuracy: 0.9000 - val_loss: 0.4398 - val_accuracy: 0.8667
Epoch 6/10
27/27 [==============================] - 7s 248ms/step - loss: 0.3038 - accuracy: 0.9375 - val_loss: 0.5549 - val_accuracy: 0.8667
Epoch 7/10
27/27 [==============================] - 7s 266ms/step - loss: 0.2275 - accuracy: 0.9500 - val_loss: 0.5911 - val_accuracy: 0.9000
Epoch 8/10
27/27 [==============================] - 7s 269ms/step - loss: 0.2600 - accuracy: 0.9000 - val_loss: 0.4840 - val_accuracy: 0.8667
Epoch 9/10
27/27 [==============================] - 7s 277ms/step - loss: 0.0877 - accuracy: 0.9625 - val_loss: 0.2083 - val_accuracy: 0.9000
Epoch 10/10
27/27 [==============================] - 7s 272ms/step - loss: 0.0880 - accuracy: 0.9500 - val_loss: 0.3411 - val_accuracy: 0.9333
In [102]:
validation_dataset=validation.flow_from_directory('C:/Users/MSI/M1_BDIA_Python pour Data Science/Data Augmentation Project/others',target_size=(224,224),batch_size=3,class_mode='binary')
dir_path='C:/Users/MSI/M1_BDIA_Python pour Data Science/Data Augmentation Project/others'
y_pred=[]
for i in validation_dataset.filenames:
    img=image.load_img(dir_path+'//'+i,target_size=(224,224))
    plt.imshow(img)
    plt.show()
    x=image.img_to_array(img)
    x=np.expand_dims(x,axis=0)
    images=np.vstack([x])
    val=model.predict(images)
    y_pred.append(int(val))
    if val==0 :
        print("Dog")
    else:
        print("Tree")
Found 19 images belonging to 2 classes.
1/1 [==============================] - 0s 83ms/step
Dog
1/1 [==============================] - 0s 23ms/step
Dog
1/1 [==============================] - 0s 27ms/step
Dog
1/1 [==============================] - 0s 25ms/step
Tree
1/1 [==============================] - 0s 24ms/step
Dog
1/1 [==============================] - 0s 28ms/step
Dog
1/1 [==============================] - 0s 26ms/step
Dog
1/1 [==============================] - 0s 26ms/step
Dog
1/1 [==============================] - 0s 25ms/step
Dog
1/1 [==============================] - 0s 26ms/step
Dog
1/1 [==============================] - 0s 29ms/step
Tree
1/1 [==============================] - 0s 26ms/step
Dog
1/1 [==============================] - 0s 24ms/step
Tree
1/1 [==============================] - 0s 23ms/step
Dog
1/1 [==============================] - 0s 25ms/step
Tree
1/1 [==============================] - 0s 24ms/step
Tree
1/1 [==============================] - 0s 26ms/step
Tree
1/1 [==============================] - 0s 35ms/step
Dog
1/1 [==============================] - 0s 23ms/step
Dog
In [103]:
print("Observerd Classes : ",validation_dataset.classes)
print("Predicted Classes : ",y_pred)
print(classification_report(y_pred,validation_dataset.classes,target_names = ['Dogs (Class 0)','Trees (Class 1)']))
Observerd Classes :  [0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1]
Predicted Classes :  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0]
                 precision    recall  f1-score   support

 Dogs (Class 0)       1.00      0.71      0.83        14
Trees (Class 1)       0.56      1.00      0.71         5

       accuracy                           0.79        19
      macro avg       0.78      0.86      0.77        19
   weighted avg       0.88      0.79      0.80        19

In [ ]: